collector.py 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692
  1. """
  2. The main purpose of this module is to expose LinkCollector.collect_links().
  3. """
  4. import cgi
  5. import functools
  6. import itertools
  7. import logging
  8. import mimetypes
  9. import os
  10. import re
  11. from collections import OrderedDict
  12. from pip._vendor import html5lib, requests
  13. from pip._vendor.distlib.compat import unescape
  14. from pip._vendor.requests.exceptions import RetryError, SSLError
  15. from pip._vendor.six.moves.urllib import parse as urllib_parse
  16. from pip._vendor.six.moves.urllib import request as urllib_request
  17. from pip._internal.exceptions import NetworkConnectionError
  18. from pip._internal.models.link import Link
  19. from pip._internal.models.search_scope import SearchScope
  20. from pip._internal.network.utils import raise_for_status
  21. from pip._internal.utils.filetypes import ARCHIVE_EXTENSIONS
  22. from pip._internal.utils.misc import pairwise, redact_auth_from_url
  23. from pip._internal.utils.typing import MYPY_CHECK_RUNNING
  24. from pip._internal.utils.urls import path_to_url, url_to_path
  25. from pip._internal.vcs import is_url, vcs
  26. if MYPY_CHECK_RUNNING:
  27. from optparse import Values
  28. from typing import (
  29. Callable, Iterable, List, MutableMapping, Optional,
  30. Protocol, Sequence, Tuple, TypeVar, Union,
  31. )
  32. import xml.etree.ElementTree
  33. from pip._vendor.requests import Response
  34. from pip._internal.network.session import PipSession
  35. HTMLElement = xml.etree.ElementTree.Element
  36. ResponseHeaders = MutableMapping[str, str]
  37. # Used in the @lru_cache polyfill.
  38. F = TypeVar('F')
  39. class LruCache(Protocol):
  40. def __call__(self, maxsize=None):
  41. # type: (Optional[int]) -> Callable[[F], F]
  42. raise NotImplementedError
  43. logger = logging.getLogger(__name__)
  44. # Fallback to noop_lru_cache in Python 2
  45. # TODO: this can be removed when python 2 support is dropped!
  46. def noop_lru_cache(maxsize=None):
  47. # type: (Optional[int]) -> Callable[[F], F]
  48. def _wrapper(f):
  49. # type: (F) -> F
  50. return f
  51. return _wrapper
  52. _lru_cache = getattr(functools, "lru_cache", noop_lru_cache) # type: LruCache
  53. def _match_vcs_scheme(url):
  54. # type: (str) -> Optional[str]
  55. """Look for VCS schemes in the URL.
  56. Returns the matched VCS scheme, or None if there's no match.
  57. """
  58. for scheme in vcs.schemes:
  59. if url.lower().startswith(scheme) and url[len(scheme)] in '+:':
  60. return scheme
  61. return None
  62. def _is_url_like_archive(url):
  63. # type: (str) -> bool
  64. """Return whether the URL looks like an archive.
  65. """
  66. filename = Link(url).filename
  67. for bad_ext in ARCHIVE_EXTENSIONS:
  68. if filename.endswith(bad_ext):
  69. return True
  70. return False
  71. class _NotHTML(Exception):
  72. def __init__(self, content_type, request_desc):
  73. # type: (str, str) -> None
  74. super(_NotHTML, self).__init__(content_type, request_desc)
  75. self.content_type = content_type
  76. self.request_desc = request_desc
  77. def _ensure_html_header(response):
  78. # type: (Response) -> None
  79. """Check the Content-Type header to ensure the response contains HTML.
  80. Raises `_NotHTML` if the content type is not text/html.
  81. """
  82. content_type = response.headers.get("Content-Type", "")
  83. if not content_type.lower().startswith("text/html"):
  84. raise _NotHTML(content_type, response.request.method)
  85. class _NotHTTP(Exception):
  86. pass
  87. def _ensure_html_response(url, session):
  88. # type: (str, PipSession) -> None
  89. """Send a HEAD request to the URL, and ensure the response contains HTML.
  90. Raises `_NotHTTP` if the URL is not available for a HEAD request, or
  91. `_NotHTML` if the content type is not text/html.
  92. """
  93. scheme, netloc, path, query, fragment = urllib_parse.urlsplit(url)
  94. if scheme not in {'http', 'https'}:
  95. raise _NotHTTP()
  96. resp = session.head(url, allow_redirects=True)
  97. raise_for_status(resp)
  98. _ensure_html_header(resp)
  99. def _get_html_response(url, session):
  100. # type: (str, PipSession) -> Response
  101. """Access an HTML page with GET, and return the response.
  102. This consists of three parts:
  103. 1. If the URL looks suspiciously like an archive, send a HEAD first to
  104. check the Content-Type is HTML, to avoid downloading a large file.
  105. Raise `_NotHTTP` if the content type cannot be determined, or
  106. `_NotHTML` if it is not HTML.
  107. 2. Actually perform the request. Raise HTTP exceptions on network failures.
  108. 3. Check the Content-Type header to make sure we got HTML, and raise
  109. `_NotHTML` otherwise.
  110. """
  111. if _is_url_like_archive(url):
  112. _ensure_html_response(url, session=session)
  113. logger.debug('Getting page %s', redact_auth_from_url(url))
  114. resp = session.get(
  115. url,
  116. headers={
  117. "Accept": "text/html",
  118. # We don't want to blindly returned cached data for
  119. # /simple/, because authors generally expecting that
  120. # twine upload && pip install will function, but if
  121. # they've done a pip install in the last ~10 minutes
  122. # it won't. Thus by setting this to zero we will not
  123. # blindly use any cached data, however the benefit of
  124. # using max-age=0 instead of no-cache, is that we will
  125. # still support conditional requests, so we will still
  126. # minimize traffic sent in cases where the page hasn't
  127. # changed at all, we will just always incur the round
  128. # trip for the conditional GET now instead of only
  129. # once per 10 minutes.
  130. # For more information, please see pypa/pip#5670.
  131. "Cache-Control": "max-age=0",
  132. },
  133. )
  134. raise_for_status(resp)
  135. # The check for archives above only works if the url ends with
  136. # something that looks like an archive. However that is not a
  137. # requirement of an url. Unless we issue a HEAD request on every
  138. # url we cannot know ahead of time for sure if something is HTML
  139. # or not. However we can check after we've downloaded it.
  140. _ensure_html_header(resp)
  141. return resp
  142. def _get_encoding_from_headers(headers):
  143. # type: (ResponseHeaders) -> Optional[str]
  144. """Determine if we have any encoding information in our headers.
  145. """
  146. if headers and "Content-Type" in headers:
  147. content_type, params = cgi.parse_header(headers["Content-Type"])
  148. if "charset" in params:
  149. return params['charset']
  150. return None
  151. def _determine_base_url(document, page_url):
  152. # type: (HTMLElement, str) -> str
  153. """Determine the HTML document's base URL.
  154. This looks for a ``<base>`` tag in the HTML document. If present, its href
  155. attribute denotes the base URL of anchor tags in the document. If there is
  156. no such tag (or if it does not have a valid href attribute), the HTML
  157. file's URL is used as the base URL.
  158. :param document: An HTML document representation. The current
  159. implementation expects the result of ``html5lib.parse()``.
  160. :param page_url: The URL of the HTML document.
  161. """
  162. for base in document.findall(".//base"):
  163. href = base.get("href")
  164. if href is not None:
  165. return href
  166. return page_url
  167. def _clean_url_path_part(part):
  168. # type: (str) -> str
  169. """
  170. Clean a "part" of a URL path (i.e. after splitting on "@" characters).
  171. """
  172. # We unquote prior to quoting to make sure nothing is double quoted.
  173. return urllib_parse.quote(urllib_parse.unquote(part))
  174. def _clean_file_url_path(part):
  175. # type: (str) -> str
  176. """
  177. Clean the first part of a URL path that corresponds to a local
  178. filesystem path (i.e. the first part after splitting on "@" characters).
  179. """
  180. # We unquote prior to quoting to make sure nothing is double quoted.
  181. # Also, on Windows the path part might contain a drive letter which
  182. # should not be quoted. On Linux where drive letters do not
  183. # exist, the colon should be quoted. We rely on urllib.request
  184. # to do the right thing here.
  185. return urllib_request.pathname2url(urllib_request.url2pathname(part))
  186. # percent-encoded: /
  187. _reserved_chars_re = re.compile('(@|%2F)', re.IGNORECASE)
  188. def _clean_url_path(path, is_local_path):
  189. # type: (str, bool) -> str
  190. """
  191. Clean the path portion of a URL.
  192. """
  193. if is_local_path:
  194. clean_func = _clean_file_url_path
  195. else:
  196. clean_func = _clean_url_path_part
  197. # Split on the reserved characters prior to cleaning so that
  198. # revision strings in VCS URLs are properly preserved.
  199. parts = _reserved_chars_re.split(path)
  200. cleaned_parts = []
  201. for to_clean, reserved in pairwise(itertools.chain(parts, [''])):
  202. cleaned_parts.append(clean_func(to_clean))
  203. # Normalize %xx escapes (e.g. %2f -> %2F)
  204. cleaned_parts.append(reserved.upper())
  205. return ''.join(cleaned_parts)
  206. def _clean_link(url):
  207. # type: (str) -> str
  208. """
  209. Make sure a link is fully quoted.
  210. For example, if ' ' occurs in the URL, it will be replaced with "%20",
  211. and without double-quoting other characters.
  212. """
  213. # Split the URL into parts according to the general structure
  214. # `scheme://netloc/path;parameters?query#fragment`.
  215. result = urllib_parse.urlparse(url)
  216. # If the netloc is empty, then the URL refers to a local filesystem path.
  217. is_local_path = not result.netloc
  218. path = _clean_url_path(result.path, is_local_path=is_local_path)
  219. return urllib_parse.urlunparse(result._replace(path=path))
  220. def _create_link_from_element(
  221. anchor, # type: HTMLElement
  222. page_url, # type: str
  223. base_url, # type: str
  224. ):
  225. # type: (...) -> Optional[Link]
  226. """
  227. Convert an anchor element in a simple repository page to a Link.
  228. """
  229. href = anchor.get("href")
  230. if not href:
  231. return None
  232. url = _clean_link(urllib_parse.urljoin(base_url, href))
  233. pyrequire = anchor.get('data-requires-python')
  234. pyrequire = unescape(pyrequire) if pyrequire else None
  235. yanked_reason = anchor.get('data-yanked')
  236. if yanked_reason:
  237. # This is a unicode string in Python 2 (and 3).
  238. yanked_reason = unescape(yanked_reason)
  239. link = Link(
  240. url,
  241. comes_from=page_url,
  242. requires_python=pyrequire,
  243. yanked_reason=yanked_reason,
  244. )
  245. return link
  246. class CacheablePageContent(object):
  247. def __init__(self, page):
  248. # type: (HTMLPage) -> None
  249. assert page.cache_link_parsing
  250. self.page = page
  251. def __eq__(self, other):
  252. # type: (object) -> bool
  253. return (isinstance(other, type(self)) and
  254. self.page.url == other.page.url)
  255. def __hash__(self):
  256. # type: () -> int
  257. return hash(self.page.url)
  258. def with_cached_html_pages(
  259. fn, # type: Callable[[HTMLPage], Iterable[Link]]
  260. ):
  261. # type: (...) -> Callable[[HTMLPage], List[Link]]
  262. """
  263. Given a function that parses an Iterable[Link] from an HTMLPage, cache the
  264. function's result (keyed by CacheablePageContent), unless the HTMLPage
  265. `page` has `page.cache_link_parsing == False`.
  266. """
  267. @_lru_cache(maxsize=None)
  268. def wrapper(cacheable_page):
  269. # type: (CacheablePageContent) -> List[Link]
  270. return list(fn(cacheable_page.page))
  271. @functools.wraps(fn)
  272. def wrapper_wrapper(page):
  273. # type: (HTMLPage) -> List[Link]
  274. if page.cache_link_parsing:
  275. return wrapper(CacheablePageContent(page))
  276. return list(fn(page))
  277. return wrapper_wrapper
  278. @with_cached_html_pages
  279. def parse_links(page):
  280. # type: (HTMLPage) -> Iterable[Link]
  281. """
  282. Parse an HTML document, and yield its anchor elements as Link objects.
  283. """
  284. document = html5lib.parse(
  285. page.content,
  286. transport_encoding=page.encoding,
  287. namespaceHTMLElements=False,
  288. )
  289. url = page.url
  290. base_url = _determine_base_url(document, url)
  291. for anchor in document.findall(".//a"):
  292. link = _create_link_from_element(
  293. anchor,
  294. page_url=url,
  295. base_url=base_url,
  296. )
  297. if link is None:
  298. continue
  299. yield link
  300. class HTMLPage(object):
  301. """Represents one page, along with its URL"""
  302. def __init__(
  303. self,
  304. content, # type: bytes
  305. encoding, # type: Optional[str]
  306. url, # type: str
  307. cache_link_parsing=True, # type: bool
  308. ):
  309. # type: (...) -> None
  310. """
  311. :param encoding: the encoding to decode the given content.
  312. :param url: the URL from which the HTML was downloaded.
  313. :param cache_link_parsing: whether links parsed from this page's url
  314. should be cached. PyPI index urls should
  315. have this set to False, for example.
  316. """
  317. self.content = content
  318. self.encoding = encoding
  319. self.url = url
  320. self.cache_link_parsing = cache_link_parsing
  321. def __str__(self):
  322. # type: () -> str
  323. return redact_auth_from_url(self.url)
  324. def _handle_get_page_fail(
  325. link, # type: Link
  326. reason, # type: Union[str, Exception]
  327. meth=None # type: Optional[Callable[..., None]]
  328. ):
  329. # type: (...) -> None
  330. if meth is None:
  331. meth = logger.debug
  332. meth("Could not fetch URL %s: %s - skipping", link, reason)
  333. def _make_html_page(response, cache_link_parsing=True):
  334. # type: (Response, bool) -> HTMLPage
  335. encoding = _get_encoding_from_headers(response.headers)
  336. return HTMLPage(
  337. response.content,
  338. encoding=encoding,
  339. url=response.url,
  340. cache_link_parsing=cache_link_parsing)
  341. def _get_html_page(link, session=None):
  342. # type: (Link, Optional[PipSession]) -> Optional[HTMLPage]
  343. if session is None:
  344. raise TypeError(
  345. "_get_html_page() missing 1 required keyword argument: 'session'"
  346. )
  347. url = link.url.split('#', 1)[0]
  348. # Check for VCS schemes that do not support lookup as web pages.
  349. vcs_scheme = _match_vcs_scheme(url)
  350. if vcs_scheme:
  351. logger.warning('Cannot look at %s URL %s because it does not support '
  352. 'lookup as web pages.', vcs_scheme, link)
  353. return None
  354. # Tack index.html onto file:// URLs that point to directories
  355. scheme, _, path, _, _, _ = urllib_parse.urlparse(url)
  356. if (scheme == 'file' and os.path.isdir(urllib_request.url2pathname(path))):
  357. # add trailing slash if not present so urljoin doesn't trim
  358. # final segment
  359. if not url.endswith('/'):
  360. url += '/'
  361. url = urllib_parse.urljoin(url, 'index.html')
  362. logger.debug(' file: URL is directory, getting %s', url)
  363. try:
  364. resp = _get_html_response(url, session=session)
  365. except _NotHTTP:
  366. logger.warning(
  367. 'Skipping page %s because it looks like an archive, and cannot '
  368. 'be checked by a HTTP HEAD request.', link,
  369. )
  370. except _NotHTML as exc:
  371. logger.warning(
  372. 'Skipping page %s because the %s request got Content-Type: %s.'
  373. 'The only supported Content-Type is text/html',
  374. link, exc.request_desc, exc.content_type,
  375. )
  376. except NetworkConnectionError as exc:
  377. _handle_get_page_fail(link, exc)
  378. except RetryError as exc:
  379. _handle_get_page_fail(link, exc)
  380. except SSLError as exc:
  381. reason = "There was a problem confirming the ssl certificate: "
  382. reason += str(exc)
  383. _handle_get_page_fail(link, reason, meth=logger.info)
  384. except requests.ConnectionError as exc:
  385. _handle_get_page_fail(link, "connection error: {}".format(exc))
  386. except requests.Timeout:
  387. _handle_get_page_fail(link, "timed out")
  388. else:
  389. return _make_html_page(resp,
  390. cache_link_parsing=link.cache_link_parsing)
  391. return None
  392. def _remove_duplicate_links(links):
  393. # type: (Iterable[Link]) -> List[Link]
  394. """
  395. Return a list of links, with duplicates removed and ordering preserved.
  396. """
  397. # We preserve the ordering when removing duplicates because we can.
  398. return list(OrderedDict.fromkeys(links))
  399. def group_locations(locations, expand_dir=False):
  400. # type: (Sequence[str], bool) -> Tuple[List[str], List[str]]
  401. """
  402. Divide a list of locations into two groups: "files" (archives) and "urls."
  403. :return: A pair of lists (files, urls).
  404. """
  405. files = []
  406. urls = []
  407. # puts the url for the given file path into the appropriate list
  408. def sort_path(path):
  409. # type: (str) -> None
  410. url = path_to_url(path)
  411. if mimetypes.guess_type(url, strict=False)[0] == 'text/html':
  412. urls.append(url)
  413. else:
  414. files.append(url)
  415. for url in locations:
  416. is_local_path = os.path.exists(url)
  417. is_file_url = url.startswith('file:')
  418. if is_local_path or is_file_url:
  419. if is_local_path:
  420. path = url
  421. else:
  422. path = url_to_path(url)
  423. if os.path.isdir(path):
  424. if expand_dir:
  425. path = os.path.realpath(path)
  426. for item in os.listdir(path):
  427. sort_path(os.path.join(path, item))
  428. elif is_file_url:
  429. urls.append(url)
  430. else:
  431. logger.warning(
  432. "Path '%s' is ignored: it is a directory.", path,
  433. )
  434. elif os.path.isfile(path):
  435. sort_path(path)
  436. else:
  437. logger.warning(
  438. "Url '%s' is ignored: it is neither a file "
  439. "nor a directory.", url,
  440. )
  441. elif is_url(url):
  442. # Only add url with clear scheme
  443. urls.append(url)
  444. else:
  445. logger.warning(
  446. "Url '%s' is ignored. It is either a non-existing "
  447. "path or lacks a specific scheme.", url,
  448. )
  449. return files, urls
  450. class CollectedLinks(object):
  451. """
  452. Encapsulates the return value of a call to LinkCollector.collect_links().
  453. The return value includes both URLs to project pages containing package
  454. links, as well as individual package Link objects collected from other
  455. sources.
  456. This info is stored separately as:
  457. (1) links from the configured file locations,
  458. (2) links from the configured find_links, and
  459. (3) urls to HTML project pages, as described by the PEP 503 simple
  460. repository API.
  461. """
  462. def __init__(
  463. self,
  464. files, # type: List[Link]
  465. find_links, # type: List[Link]
  466. project_urls, # type: List[Link]
  467. ):
  468. # type: (...) -> None
  469. """
  470. :param files: Links from file locations.
  471. :param find_links: Links from find_links.
  472. :param project_urls: URLs to HTML project pages, as described by
  473. the PEP 503 simple repository API.
  474. """
  475. self.files = files
  476. self.find_links = find_links
  477. self.project_urls = project_urls
  478. class LinkCollector(object):
  479. """
  480. Responsible for collecting Link objects from all configured locations,
  481. making network requests as needed.
  482. The class's main method is its collect_links() method.
  483. """
  484. def __init__(
  485. self,
  486. session, # type: PipSession
  487. search_scope, # type: SearchScope
  488. ):
  489. # type: (...) -> None
  490. self.search_scope = search_scope
  491. self.session = session
  492. @classmethod
  493. def create(cls, session, options, suppress_no_index=False):
  494. # type: (PipSession, Values, bool) -> LinkCollector
  495. """
  496. :param session: The Session to use to make requests.
  497. :param suppress_no_index: Whether to ignore the --no-index option
  498. when constructing the SearchScope object.
  499. """
  500. index_urls = [options.index_url] + options.extra_index_urls
  501. if options.no_index and not suppress_no_index:
  502. logger.debug(
  503. 'Ignoring indexes: %s',
  504. ','.join(redact_auth_from_url(url) for url in index_urls),
  505. )
  506. index_urls = []
  507. # Make sure find_links is a list before passing to create().
  508. find_links = options.find_links or []
  509. search_scope = SearchScope.create(
  510. find_links=find_links, index_urls=index_urls,
  511. )
  512. link_collector = LinkCollector(
  513. session=session, search_scope=search_scope,
  514. )
  515. return link_collector
  516. @property
  517. def find_links(self):
  518. # type: () -> List[str]
  519. return self.search_scope.find_links
  520. def fetch_page(self, location):
  521. # type: (Link) -> Optional[HTMLPage]
  522. """
  523. Fetch an HTML page containing package links.
  524. """
  525. return _get_html_page(location, session=self.session)
  526. def collect_links(self, project_name):
  527. # type: (str) -> CollectedLinks
  528. """Find all available links for the given project name.
  529. :return: All the Link objects (unfiltered), as a CollectedLinks object.
  530. """
  531. search_scope = self.search_scope
  532. index_locations = search_scope.get_index_urls_locations(project_name)
  533. index_file_loc, index_url_loc = group_locations(index_locations)
  534. fl_file_loc, fl_url_loc = group_locations(
  535. self.find_links, expand_dir=True,
  536. )
  537. file_links = [
  538. Link(url) for url in itertools.chain(index_file_loc, fl_file_loc)
  539. ]
  540. # We trust every directly linked archive in find_links
  541. find_link_links = [Link(url, '-f') for url in self.find_links]
  542. # We trust every url that the user has given us whether it was given
  543. # via --index-url or --find-links.
  544. # We want to filter out anything that does not have a secure origin.
  545. url_locations = [
  546. link for link in itertools.chain(
  547. # Mark PyPI indices as "cache_link_parsing == False" -- this
  548. # will avoid caching the result of parsing the page for links.
  549. (Link(url, cache_link_parsing=False) for url in index_url_loc),
  550. (Link(url) for url in fl_url_loc),
  551. )
  552. if self.session.is_secure_origin(link)
  553. ]
  554. url_locations = _remove_duplicate_links(url_locations)
  555. lines = [
  556. '{} location(s) to search for versions of {}:'.format(
  557. len(url_locations), project_name,
  558. ),
  559. ]
  560. for link in url_locations:
  561. lines.append('* {}'.format(link))
  562. logger.debug('\n'.join(lines))
  563. return CollectedLinks(
  564. files=file_links,
  565. find_links=find_link_links,
  566. project_urls=url_locations,
  567. )